x86: Clean up existing XSAVE support
authorKeir Fraser <keir@xen.org>
Fri, 29 Oct 2010 17:04:46 +0000 (18:04 +0100)
committerKeir Fraser <keir@xen.org>
Fri, 29 Oct 2010 17:04:46 +0000 (18:04 +0100)
Signed-off-by: Han Weidong <weidong.han@intel.com>
Signed-off-by: Shan Haitao <haitao.shan@intel.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/i387.c
xen/include/asm-x86/hvm/vcpu.h
xen/include/asm-x86/i387.h

index 94190d3079591da6c1c67909769188fa22e2334f..472df2f82e1fcbcce44aaaa5fe42ef36a3f4ba00 100644 (file)
@@ -814,7 +814,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
         xsave_init_save_area(xsave_area);
         v->arch.hvm_vcpu.xsave_area = xsave_area;
-        v->arch.hvm_vcpu.xfeature_mask = XSTATE_FP_SSE;
+        v->arch.hvm_vcpu.xcr0 = XSTATE_FP_SSE;
     }
 
     if ( (rc = vlapic_init(v)) != 0 )
@@ -2002,8 +2002,8 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
         if ( cpu_has_xsave )
         {
             /*
-             *  Fix up "Processor Extended State Enumeration". We only present
-             *  FPU(bit0) and SSE(bit1) to HVM guest for now.
+             *  Fix up "Processor Extended State Enumeration". We present
+             *  FPU(bit0), SSE(bit1) and YMM(bit2) to HVM guest for now.
              */
             *eax = *ebx = *ecx = *edx = 0;
             switch ( count )
@@ -2012,14 +2012,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
                 /* No HW defines bit in EDX yet. */
                 *edx = 0;
                 /* We only enable the features we know. */
-                *eax = xfeature_low;
+                *eax = xfeature_mask;
                 /* FP/SSE + XSAVE.HEADER + YMM. */
                 *ecx = 512 + 64 + ((*eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
                 /* Let ebx equal ecx at present. */
                 *ebx = *ecx;
                 break;
             case 2:
-                if ( !(xfeature_low & XSTATE_YMM) )
+                if ( !(xfeature_mask & XSTATE_YMM) )
                     break;
                 *eax = XSTATE_YMM_SIZE;
                 *ebx = XSTATE_YMM_OFFSET;
index 750d0fba8bc3fb97687aa6d0be2b3e098984278b..24c2331f472df059a4257bfa4ecf0b17db934cc2 100644 (file)
@@ -2203,20 +2203,19 @@ static int vmx_handle_eoi_write(void)
 static int vmx_handle_xsetbv(u64 new_bv)
 {
     struct vcpu *v = current;
-    u64 xfeature = (((u64)xfeature_high) << 32) | xfeature_low;
     struct segment_register sreg;
 
     hvm_get_segment_register(v, x86_seg_ss, &sreg);
     if ( sreg.attr.fields.dpl != 0 )
         goto err;
 
-    if ( ((new_bv ^ xfeature) & ~xfeature) || !(new_bv & 1) )
+    if ( ((new_bv ^ xfeature_mask) & ~xfeature_mask) || !(new_bv & 1) )
         goto err;
 
-    if ( (xfeature & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
+    if ( (xfeature_mask & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
         goto err;
 
-    v->arch.hvm_vcpu.xfeature_mask = new_bv;
+    v->arch.hvm_vcpu.xcr0 = new_bv;
     set_xcr0(new_bv);
     return 0;
 err:
index 65fee7ac725e06536c9c9c20b73de891e68fa96b..fa16fa9c5c51578d9a7dbf2cd60bb4de4fcb470d 100644 (file)
@@ -142,7 +142,7 @@ void restore_fpu(struct vcpu *v)
 u32 xsave_cntxt_size;
 
 /* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
-u32 xfeature_low, xfeature_high;
+u64 xfeature_mask;
 
 void xsave_init(void)
 {
@@ -186,15 +186,15 @@ void xsave_init(void)
          * We know FP/SSE and YMM about eax, and nothing about edx at present.
          */
         xsave_cntxt_size = ebx;
-        xfeature_low = eax & XCNTXT_MASK;
-        xfeature_high = 0;
-        printk("%s: using cntxt_size: 0x%x and states: %08x:%08x\n",
-            __func__, xsave_cntxt_size, xfeature_high, xfeature_low);
+        xfeature_mask = eax + ((u64)edx << 32);
+        xfeature_mask &= XCNTXT_MASK;
+        printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
+            __func__, xsave_cntxt_size, xfeature_mask);
     }
     else
     {
         BUG_ON(xsave_cntxt_size != ebx);
-        BUG_ON(xfeature_low != (eax & XCNTXT_MASK));
+        BUG_ON(xfeature_mask != (xfeature_mask & XCNTXT_MASK));
     }
 }
 
@@ -202,11 +202,7 @@ void xsave_init_save_area(void *save_area)
 {
     memset(save_area, 0, xsave_cntxt_size);
 
-    ((u16 *)save_area)[0] = 0x37f;   /* FCW   */
-    ((u16 *)save_area)[2] = 0xffff;  /* FTW   */
     ((u32 *)save_area)[6] = 0x1f80;  /* MXCSR */
-
-    ((struct xsave_struct *)save_area)->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
 }
 
 /*
index 52c6c09e559bbd91ea988c54afd7084755eca10a..53ef98320f98dcdc15ca232b3172dcd88ef14da3 100644 (file)
@@ -56,7 +56,7 @@ struct hvm_vcpu {
      * #NM handler, we XRSTOR the states we XSAVE-ed;
      */
     void *xsave_area;
-    uint64_t xfeature_mask;
+    uint64_t xcr0;
 
     struct vlapic       vlapic;
     s64                 cache_tsc_offset;
index 39e0e7df56dd9cc44717a6cc10e495a7ccaf2010..ba365c0b3caa04fff0ab655866f26277802857db 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/processor.h>
 
 extern unsigned int xsave_cntxt_size;
-extern u32 xfeature_low, xfeature_high;
+extern u64 xfeature_mask;
 
 extern void xsave_init(void);
 extern void xsave_init_save_area(void *save_area);
@@ -49,45 +49,41 @@ struct xsave_struct
 #define REX_PREFIX
 #endif
 
-static inline void xsetbv(u32 index, u64 xfeature_mask)
+static inline void xsetbv(u32 index, u64 xfeatures)
 {
-    u32 hi = xfeature_mask >> 32;
-    u32 lo = (u32)xfeature_mask;
+    u32 hi = xfeatures >> 32;
+    u32 lo = (u32)xfeatures;
 
     asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
             "a" (lo), "d" (hi));
 }
 
-static inline void set_xcr0(u64 xfeature_mask)
+static inline void set_xcr0(u64 xfeatures)
 {
-    xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeature_mask);
+    xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
 }
 
 static inline void xsave(struct vcpu *v)
 {
-    u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
-    u32 lo = mask, hi = mask >> 32;
     struct xsave_struct *ptr;
 
     ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
 
     asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
         :
-        : "a" (lo), "d" (hi), "D"(ptr)
+        : "a" (-1), "d" (-1), "D"(ptr)
         : "memory");
 }
 
 static inline void xrstor(struct vcpu *v)
 {
-    u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
-    u32 lo = mask, hi = mask >> 32;
     struct xsave_struct *ptr;
 
     ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
 
     asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
         :
-        : "m" (*ptr), "a" (lo), "d" (hi), "D"(ptr));
+        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
 }
 
 extern void init_fpu(void);
@@ -117,9 +113,9 @@ static inline void setup_fpu(struct vcpu *v)
             if ( !v->fpu_initialised )
                 v->fpu_initialised = 1;
 
-            set_xcr0(v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE);
+            set_xcr0(v->arch.hvm_vcpu.xcr0 | XSTATE_FP_SSE);
             xrstor(v);
-            set_xcr0(v->arch.hvm_vcpu.xfeature_mask);
+            set_xcr0(v->arch.hvm_vcpu.xcr0);
         }
         else
         {